import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import os
import cv2
import warnings
import cv2 as cv
from glob import glob
from termcolor import colored
warnings.filterwarnings('ignore')
import itertools
import PIL.Image as Image
from tqdm import tqdm
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow import keras
from keras.utils import np_utils
from tensorflow.keras import layers
from tensorflow.keras.activations import softmax
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers,regularizers
from tensorflow.python.keras.models import Sequential
from tensorflow.keras.layers import Reshape, Activation
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import Dropout,BatchNormalization, Activation
from tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPool2D
from tensorflow.python.keras.wrappers.scikit_learn import KerasRegressor
from sklearn import svm
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
print(tf.__version__)
2.4.1
from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
print(colored('\x1B[1mImporting data from drive.....','blue'))
cars = '/content/drive/MyDrive/Files/Cars Images'
n_cars = os.listdir(cars)
Importing data from drive.....
print(colored('\x1B[1m Getting details of train dataset.....','blue'))
print('\x1B[1m Total no of images inside test dataset are',len(n_cars))
Getting details of train dataset..... Total no of images inside test dataset are 15
print(colored('\x1B[1mChanging image to array form.....','blue'))
car_path = '/content/drive/MyDrive/Files/Cars Images/*.jpg'
cars = glob(car_path)
car_images = []
for car in cars:
car_images.append(cv2.resize(cv2.imread(car), (224, 224)))
car_I = np.asarray(car_images)
Changing image to array form.....
print(colored('\x1B[1mVisualizing the images.....','blue'))
Visualizing the images.....
n=0
for x in range(0,15):
print('\t Image No.',x+1)
plt.figure(figsize=(10, 10))
n=+1
plt.subplot(2,2, n)
plt.imshow(car_I[x])
plt.show()
print('\n')
Image No. 1
Image No. 2
Image No. 3
Image No. 4
Image No. 5
Image No. 6
Image No. 7
Image No. 8
Image No. 9
Image No. 10
Image No. 11
Image No. 12
Image No. 13
Image No. 14
Image No. 15
car_I.shape
(15, 224, 224, 3)
print(colored('\x1B[1mNormalizing the pixels of images.....','blue'))
car_I = car_I/255
Normalizing the pixels of images.....
X=car_I
print(colored('\x1B[1mBuilding image dataset.....','green'))
Building image dataset.....
print(colored('\x1B[1mInitializing Mobilenet calssification model.....','blue'))
model = tf.keras.Sequential([
hub.KerasLayer("https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/2",)])
Initializing Mobilenet calssification model.....
print(colored('\x1B[1mPredicting the images through the model.....','blue'))
results = []
for i in range(0, 15):
result = model.predict(X[i][np.newaxis, ...])
results.append(result)
Predicting the images through the model.....
print(colored('\x1B[1mNew shape of the images.....','blue'))
for i in range(0, 15):
print('Sahpe of image',i+1,'after predicition',results[i].shape)
New shape of the images.....
Sahpe of image 1 after predicition (1, 1001)
Sahpe of image 2 after predicition (1, 1001)
Sahpe of image 3 after predicition (1, 1001)
Sahpe of image 4 after predicition (1, 1001)
Sahpe of image 5 after predicition (1, 1001)
Sahpe of image 6 after predicition (1, 1001)
Sahpe of image 7 after predicition (1, 1001)
Sahpe of image 8 after predicition (1, 1001)
Sahpe of image 9 after predicition (1, 1001)
Sahpe of image 10 after predicition (1, 1001)
Sahpe of image 11 after predicition (1, 1001)
Sahpe of image 12 after predicition (1, 1001)
Sahpe of image 13 after predicition (1, 1001)
Sahpe of image 14 after predicition (1, 1001)
Sahpe of image 15 after predicition (1, 1001)
print(colored('\x1B[1mThe top class ID can be found with argmax:.....','blue'))
predicted_classes = []
for i in range(0, 15):
predicted_class = np.argmax(results[i][0], axis=-1)
predicted_classes.append(predicted_class)
The top class ID can be found with argmax:.....
print(colored('\x1B[1mDownloading the Imagenet data for predicting the labels.....','blue'))
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
Downloading the Imagenet data for predicting the labels.....
print(colored('\x1B[1mPredicting the labels.....','blue'))
labels=[]
for i in range(0, 15):
plt.imshow(X[i])
plt.axis('off')
predicted_class_name = imagenet_labels[predicted_classes[i]]
_ = plt.title("Prediction: " + predicted_class_name.title())
labels.append(predicted_class_name)
plt.show()
Predicting the labels.....
print(colored('\x1B[1mBuilding image calssifier.....','green'))
Building image calssifier.....
print(colored('\x1B[1mLabels.....','blue'))
a=labels
a
Labels.....
['barn', 'beach wagon', 'minivan', 'minivan', 'minivan', 'minivan', 'minivan', 'convertible', 'sports car', 'sports car', 'minivan', 'pickup', 'convertible', 'beach wagon', 'limousine']
print(colored('\x1B[1mUnique labels are.....','blue'))
len(np.unique(a))
Unique labels are.....
7
Y = LabelEncoder()
Y.fit(a)
print('Classes'+str(Y.classes_))
Classes['barn' 'beach wagon' 'convertible' 'limousine' 'minivan' 'pickup' 'sports car']
y = Y.transform(a)
classes = y.shape
print(str(classes))
(15,)
print(colored('\x1B[1mLength of total image set','blue'))
lenofimage = len(X)
lenofimage
Length of total image set
15
print(colored('\x1B[1mReshaping of image dataset','blue'))
x = np.array(X).reshape(lenofimage,-1)
print('\x1B[1m',x.shape)
Reshaping of image dataset (15, 150528)
print(colored('\x1B[1mChanging target labels from list to array','blue'))
#y=np.array(target_val)
print('\x1B[1m',y.shape)
Changing target labels from list to array (15,)
print(colored('\x1B[1mSpliting','blue'))
X_train, X_test, y_train, y_test = train_test_split(x, y, train_size=0.7, test_size=0.3, random_state=42)
score_train={}
score_test={}
accuracy={}
print(colored('\x1B[1mShape of datasets','green'))
print('Shape of X Train',X_train.shape)
print('Shape of Y Train',y_train.shape)
print('Shape of X Test',X_test.shape)
print('Shape of Y Test',y_test.shape)
Spliting Shape of datasets Shape of X Train (10, 150528) Shape of Y Train (10,) Shape of X Test (5, 150528) Shape of Y Test (5,)
print(colored('\x1B[1mSVM','blue'))
svm = SVC(kernel='linear', gamma= 'auto',C=.1)
svm.fit(X_train, y_train)
SVM
SVC(C=0.1, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
print(colored('\x1B[1mPredicition','blue'))
y_pred = svm.predict(X_test)
model_train = svm.score(X_train,y_train)
model_test = svm.score(X_test,y_test)
score_train['SVM'] = model_train
score_test['SVM Test Score']=model_test
accuracy['SVM Accuracy'] = metrics.accuracy_score(y_test, y_pred)
Predicition
print(colored('\x1B[1mAccuracy','blue'))
print('Train Score:',model_train)
print('Test Score:',model_test)
Accuracy
Train Score: 1.0
Test Score: 0.2
print(colored('\x1B[1mClassification Report','blue'))
print(classification_report(y_test, y_pred))
cm_svc = (y_test != y_pred).sum()
print('\n Misclassified samples in SVM: {}'.format(cm_svc))
Classification Report
precision recall f1-score support
0 0.00 0.00 0.00 1
1 0.00 0.00 0.00 1
4 0.20 1.00 0.33 1
5 0.00 0.00 0.00 1
6 0.00 0.00 0.00 1
accuracy 0.20 5
macro avg 0.04 0.20 0.07 5
weighted avg 0.04 0.20 0.07 5
Misclassified samples in SVM: 4
print(colored('\x1B[1mRandom Forest','blue'))
rf = RandomForestClassifier(n_estimators= 25, max_features= 6, max_depth= 10, bootstrap= False)
Random Forest
print(colored('\x1B[1mPredicition','blue'))
rf.fit(X_train,y_train)
y_pred = rf.predict(X_test)
model_train = rf.score(X_train,y_train)
model_test = rf.score(X_test,y_test)
score_train['Random Forest'] = model_train
score_test['Random Forest Test Score']=model_test
accuracy['Random Forest Accuracy'] = metrics.accuracy_score(y_test, y_pred)
Predicition
print(colored('\x1B[1mAccuracy','blue'))
print('Train Score:',model_train)
print('Test Score:',model_test)
Accuracy
Train Score: 1.0
Test Score: 0.0
print(colored('\x1B[1mClassification Report','blue'))
print(classification_report(y_test, y_pred))
cm_rf = (y_test != y_pred).sum()
print('\n Misclassified samples in Random Forest: {}'.format(cm_rf))
Classification Report
precision recall f1-score support
0 0.00 0.00 0.00 1.0
1 0.00 0.00 0.00 1.0
4 0.00 0.00 0.00 1.0
5 0.00 0.00 0.00 1.0
6 0.00 0.00 0.00 1.0
accuracy 0.00 5.0
macro avg 0.00 0.00 0.00 5.0
weighted avg 0.00 0.00 0.00 5.0
Misclassified samples in Random Forest: 5
print(colored('\x1B[1mCDecision Tree','blue'))
dTree = DecisionTreeClassifier(min_samples_leaf=7, max_features=5, max_depth=5, criterion='gini', random_state=1)
CDecision Tree
print(colored('\x1B[1mPredicition','blue'))
dTree.fit(X_train,y_train)
y_pred = dTree.predict(X_test)
model_train = dTree.score(X_train,y_train)
model_test = dTree.score(X_test,y_test)
score_train['Decision Tree'] = model_train
score_test['Decision Tree Test Score']=model_test
accuracy['Decision Tree Accuracy'] = metrics.accuracy_score(y_test, y_pred)
Predicition
print(colored('\x1B[1mAccuracy','blue'))
print('Train Score:',model_train)
print('Test Score:',model_test)
Accuracy
Train Score: 0.5
Test Score: 0.2
print(colored('\x1B[1mClassification Report','blue'))
print(classification_report(y_test, y_pred))
cm_dTree = (y_test != y_pred).sum()
print('\n Misclassified samples in Decision Tree: {}'.format(cm_dTree))
Classification Report
precision recall f1-score support
0 0.00 0.00 0.00 1
1 0.00 0.00 0.00 1
4 0.20 1.00 0.33 1
5 0.00 0.00 0.00 1
6 0.00 0.00 0.00 1
accuracy 0.20 5
macro avg 0.04 0.20 0.07 5
weighted avg 0.04 0.20 0.07 5
Misclassified samples in Decision Tree: 4
print(colored('\x1B[1mSetting labels to categorically labels','blue'))
y_c = to_categorical(y, num_classes=7)
Setting labels to categorically labels
print(colored('\x1B[1mSpliting','blue'))
X_train, X_test, y_train, y_test = train_test_split(x, y_c, train_size=0.7, test_size=0.3, random_state=42)
Spliting
print(colored('\x1B[1mShape of datasets','green'))
print('Shape of X Train',X_train.shape)
print('Shape of Y Train',y_train.shape)
print('Shape of X Test',X_test.shape)
print('Shape of Y Test',y_test.shape)
Shape of datasets
Shape of X Train (10, 150528)
Shape of Y Train (10, 7)
Shape of X Test (5, 150528)
Shape of Y Test (5, 7)
input = x.shape[1]
input
150528
print(colored('\x1B[1mNeural Network Model Building','blue'))
Neural Network Model Building
hidden_nodes = 256
output_nodes = 7
model = Sequential()
model.add(Dense(hidden_nodes, input_shape=(input,), activation='relu'))
model.add(Dense(hidden_nodes, activation='relu'))
model.add(Dense(output_nodes, activation='softmax', kernel_regularizer=regularizers.l2(0)))
opt = tf.keras.optimizers.Adam(lr=.001)
sgd = optimizers.SGD(lr=.001, decay=1e-6, momentum=0.9)
# Compile model
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# Fit the model
model.fit(X_train, y_train, epochs=100, batch_size=200, verbose= 1)
score = model.evaluate(X_test, y_test, verbose=0)
score_train['NN'] = model.evaluate(X_train, y_train, verbose=0)
score_test['NN Test Score']=score
accuracy['NN Accuracy'] = score
Epoch 1/100 1/1 [==============================] - 1s 712ms/step - loss: 2.0955 - accuracy: 0.0000e+00 Epoch 2/100 1/1 [==============================] - 0s 156ms/step - loss: 1.3949 - accuracy: 0.5000 Epoch 3/100 1/1 [==============================] - 0s 162ms/step - loss: 1.3330 - accuracy: 0.4000 Epoch 4/100 1/1 [==============================] - 0s 132ms/step - loss: 1.0419 - accuracy: 0.5000 Epoch 5/100 1/1 [==============================] - 0s 127ms/step - loss: 0.6865 - accuracy: 0.7000 Epoch 6/100 1/1 [==============================] - 0s 128ms/step - loss: 0.4558 - accuracy: 0.9000 Epoch 7/100 1/1 [==============================] - 0s 147ms/step - loss: 0.3033 - accuracy: 0.9000 Epoch 8/100 1/1 [==============================] - 0s 131ms/step - loss: 0.2200 - accuracy: 0.9000 Epoch 9/100 1/1 [==============================] - 0s 128ms/step - loss: 0.0786 - accuracy: 1.0000 Epoch 10/100 1/1 [==============================] - 0s 135ms/step - loss: 0.0574 - accuracy: 1.0000 Epoch 11/100 1/1 [==============================] - 0s 133ms/step - loss: 0.0680 - accuracy: 1.0000 Epoch 12/100 1/1 [==============================] - 0s 146ms/step - loss: 0.0288 - accuracy: 1.0000 Epoch 13/100 1/1 [==============================] - 0s 135ms/step - loss: 0.0119 - accuracy: 1.0000 Epoch 14/100 1/1 [==============================] - 0s 126ms/step - loss: 0.0081 - accuracy: 1.0000 Epoch 15/100 1/1 [==============================] - 0s 136ms/step - loss: 0.0070 - accuracy: 1.0000 Epoch 16/100 1/1 [==============================] - 0s 131ms/step - loss: 0.0065 - accuracy: 1.0000 Epoch 17/100 1/1 [==============================] - 0s 152ms/step - loss: 0.0060 - accuracy: 1.0000 Epoch 18/100 1/1 [==============================] - 0s 142ms/step - loss: 0.0052 - accuracy: 1.0000 Epoch 19/100 1/1 [==============================] - 0s 130ms/step - loss: 0.0043 - accuracy: 1.0000 Epoch 20/100 1/1 [==============================] - 0s 127ms/step - loss: 0.0033 - accuracy: 1.0000 Epoch 21/100 1/1 [==============================] - 0s 133ms/step - loss: 0.0025 - accuracy: 1.0000 Epoch 22/100 1/1 [==============================] - 0s 145ms/step - loss: 0.0018 - accuracy: 1.0000 Epoch 23/100 1/1 [==============================] - 0s 140ms/step - loss: 0.0014 - accuracy: 1.0000 Epoch 24/100 1/1 [==============================] - 0s 127ms/step - loss: 0.0011 - accuracy: 1.0000 Epoch 25/100 1/1 [==============================] - 0s 129ms/step - loss: 9.0680e-04 - accuracy: 1.0000 Epoch 26/100 1/1 [==============================] - 0s 133ms/step - loss: 7.7450e-04 - accuracy: 1.0000 Epoch 27/100 1/1 [==============================] - 0s 129ms/step - loss: 6.8118e-04 - accuracy: 1.0000 Epoch 28/100 1/1 [==============================] - 0s 142ms/step - loss: 6.1470e-04 - accuracy: 1.0000 Epoch 29/100 1/1 [==============================] - 0s 144ms/step - loss: 5.6594e-04 - accuracy: 1.0000 Epoch 30/100 1/1 [==============================] - 0s 124ms/step - loss: 5.2902e-04 - accuracy: 1.0000 Epoch 31/100 1/1 [==============================] - 0s 133ms/step - loss: 4.9987e-04 - accuracy: 1.0000 Epoch 32/100 1/1 [==============================] - 0s 125ms/step - loss: 4.7621e-04 - accuracy: 1.0000 Epoch 33/100 1/1 [==============================] - 0s 137ms/step - loss: 4.5635e-04 - accuracy: 1.0000 Epoch 34/100 1/1 [==============================] - 0s 126ms/step - loss: 4.3924e-04 - accuracy: 1.0000 Epoch 35/100 1/1 [==============================] - 0s 121ms/step - loss: 4.2389e-04 - accuracy: 1.0000 Epoch 36/100 1/1 [==============================] - 0s 127ms/step - loss: 4.0973e-04 - accuracy: 1.0000 Epoch 37/100 1/1 [==============================] - 0s 133ms/step - loss: 3.9637e-04 - accuracy: 1.0000 Epoch 38/100 1/1 [==============================] - 0s 131ms/step - loss: 3.8364e-04 - accuracy: 1.0000 Epoch 39/100 1/1 [==============================] - 0s 123ms/step - loss: 3.7145e-04 - accuracy: 1.0000 Epoch 40/100 1/1 [==============================] - 0s 125ms/step - loss: 3.5967e-04 - accuracy: 1.0000 Epoch 41/100 1/1 [==============================] - 0s 133ms/step - loss: 3.4829e-04 - accuracy: 1.0000 Epoch 42/100 1/1 [==============================] - 0s 131ms/step - loss: 3.3725e-04 - accuracy: 1.0000 Epoch 43/100 1/1 [==============================] - 0s 127ms/step - loss: 3.2661e-04 - accuracy: 1.0000 Epoch 44/100 1/1 [==============================] - 0s 136ms/step - loss: 3.1630e-04 - accuracy: 1.0000 Epoch 45/100 1/1 [==============================] - 0s 131ms/step - loss: 3.0639e-04 - accuracy: 1.0000 Epoch 46/100 1/1 [==============================] - 0s 137ms/step - loss: 2.9688e-04 - accuracy: 1.0000 Epoch 47/100 1/1 [==============================] - 0s 133ms/step - loss: 2.8778e-04 - accuracy: 1.0000 Epoch 48/100 1/1 [==============================] - 0s 137ms/step - loss: 2.7907e-04 - accuracy: 1.0000 Epoch 49/100 1/1 [==============================] - 0s 126ms/step - loss: 2.7081e-04 - accuracy: 1.0000 Epoch 50/100 1/1 [==============================] - 0s 137ms/step - loss: 2.6297e-04 - accuracy: 1.0000 Epoch 51/100 1/1 [==============================] - 0s 131ms/step - loss: 2.5554e-04 - accuracy: 1.0000 Epoch 52/100 1/1 [==============================] - 0s 138ms/step - loss: 2.4846e-04 - accuracy: 1.0000 Epoch 53/100 1/1 [==============================] - 0s 122ms/step - loss: 2.4180e-04 - accuracy: 1.0000 Epoch 54/100 1/1 [==============================] - 0s 124ms/step - loss: 2.3552e-04 - accuracy: 1.0000 Epoch 55/100 1/1 [==============================] - 0s 129ms/step - loss: 2.2959e-04 - accuracy: 1.0000 Epoch 56/100 1/1 [==============================] - 0s 129ms/step - loss: 2.2405e-04 - accuracy: 1.0000 Epoch 57/100 1/1 [==============================] - 0s 131ms/step - loss: 2.1884e-04 - accuracy: 1.0000 Epoch 58/100 1/1 [==============================] - 0s 128ms/step - loss: 2.1394e-04 - accuracy: 1.0000 Epoch 59/100 1/1 [==============================] - 0s 145ms/step - loss: 2.0933e-04 - accuracy: 1.0000 Epoch 60/100 1/1 [==============================] - 0s 127ms/step - loss: 2.0501e-04 - accuracy: 1.0000 Epoch 61/100 1/1 [==============================] - 0s 126ms/step - loss: 2.0095e-04 - accuracy: 1.0000 Epoch 62/100 1/1 [==============================] - 0s 126ms/step - loss: 1.9715e-04 - accuracy: 1.0000 Epoch 63/100 1/1 [==============================] - 0s 127ms/step - loss: 1.9359e-04 - accuracy: 1.0000 Epoch 64/100 1/1 [==============================] - 0s 127ms/step - loss: 1.9024e-04 - accuracy: 1.0000 Epoch 65/100 1/1 [==============================] - 0s 125ms/step - loss: 1.8709e-04 - accuracy: 1.0000 Epoch 66/100 1/1 [==============================] - 0s 127ms/step - loss: 1.8414e-04 - accuracy: 1.0000 Epoch 67/100 1/1 [==============================] - 0s 138ms/step - loss: 1.8137e-04 - accuracy: 1.0000 Epoch 68/100 1/1 [==============================] - 0s 138ms/step - loss: 1.7877e-04 - accuracy: 1.0000 Epoch 69/100 1/1 [==============================] - 0s 125ms/step - loss: 1.7632e-04 - accuracy: 1.0000 Epoch 70/100 1/1 [==============================] - 0s 133ms/step - loss: 1.7402e-04 - accuracy: 1.0000 Epoch 71/100 1/1 [==============================] - 0s 130ms/step - loss: 1.7183e-04 - accuracy: 1.0000 Epoch 72/100 1/1 [==============================] - 0s 126ms/step - loss: 1.6983e-04 - accuracy: 1.0000 Epoch 73/100 1/1 [==============================] - 0s 136ms/step - loss: 1.6790e-04 - accuracy: 1.0000 Epoch 74/100 1/1 [==============================] - 0s 132ms/step - loss: 1.6606e-04 - accuracy: 1.0000 Epoch 75/100 1/1 [==============================] - 0s 127ms/step - loss: 1.6438e-04 - accuracy: 1.0000 Epoch 76/100 1/1 [==============================] - 0s 126ms/step - loss: 1.6274e-04 - accuracy: 1.0000 Epoch 77/100 1/1 [==============================] - 0s 135ms/step - loss: 1.6120e-04 - accuracy: 1.0000 Epoch 78/100 1/1 [==============================] - 0s 125ms/step - loss: 1.5981e-04 - accuracy: 1.0000 Epoch 79/100 1/1 [==============================] - 0s 127ms/step - loss: 1.5841e-04 - accuracy: 1.0000 Epoch 80/100 1/1 [==============================] - 0s 125ms/step - loss: 1.5712e-04 - accuracy: 1.0000 Epoch 81/100 1/1 [==============================] - 0s 126ms/step - loss: 1.5587e-04 - accuracy: 1.0000 Epoch 82/100 1/1 [==============================] - 0s 140ms/step - loss: 1.5470e-04 - accuracy: 1.0000 Epoch 83/100 1/1 [==============================] - 0s 126ms/step - loss: 1.5361e-04 - accuracy: 1.0000 Epoch 84/100 1/1 [==============================] - 0s 126ms/step - loss: 1.5256e-04 - accuracy: 1.0000 Epoch 85/100 1/1 [==============================] - 0s 125ms/step - loss: 1.5153e-04 - accuracy: 1.0000 Epoch 86/100 1/1 [==============================] - 0s 156ms/step - loss: 1.5059e-04 - accuracy: 1.0000 Epoch 87/100 1/1 [==============================] - 0s 129ms/step - loss: 1.4966e-04 - accuracy: 1.0000 Epoch 88/100 1/1 [==============================] - 0s 129ms/step - loss: 1.4881e-04 - accuracy: 1.0000 Epoch 89/100 1/1 [==============================] - 0s 134ms/step - loss: 1.4795e-04 - accuracy: 1.0000 Epoch 90/100 1/1 [==============================] - 0s 128ms/step - loss: 1.4711e-04 - accuracy: 1.0000 Epoch 91/100 1/1 [==============================] - 0s 130ms/step - loss: 1.4636e-04 - accuracy: 1.0000 Epoch 92/100 1/1 [==============================] - 0s 130ms/step - loss: 1.4563e-04 - accuracy: 1.0000 Epoch 93/100 1/1 [==============================] - 0s 128ms/step - loss: 1.4491e-04 - accuracy: 1.0000 Epoch 94/100 1/1 [==============================] - 0s 129ms/step - loss: 1.4422e-04 - accuracy: 1.0000 Epoch 95/100 1/1 [==============================] - 0s 127ms/step - loss: 1.4356e-04 - accuracy: 1.0000 Epoch 96/100 1/1 [==============================] - 0s 130ms/step - loss: 1.4292e-04 - accuracy: 1.0000 Epoch 97/100 1/1 [==============================] - 0s 134ms/step - loss: 1.4230e-04 - accuracy: 1.0000 Epoch 98/100 1/1 [==============================] - 0s 136ms/step - loss: 1.4168e-04 - accuracy: 1.0000 Epoch 99/100 1/1 [==============================] - 0s 128ms/step - loss: 1.4111e-04 - accuracy: 1.0000 Epoch 100/100 1/1 [==============================] - 0s 127ms/step - loss: 1.4055e-04 - accuracy: 1.0000
print(colored('\x1B[1mScore','green'),score)
Score [9.118935585021973, 0.0]
print(colored('\x1B[1mSpliting Train and Test sets','blue'))
X_train,X_test,y_train,y_test=train_test_split(X,y_c,test_size=0.3,random_state=7)
Spliting Train and Test sets
print(colored('\x1B[1mShape of datasets','green'))
print('Shape of X Train',X_train.shape)
print('Shape of Y Train',y_train.shape)
print('Shape of X Test',X_test.shape)
print('Shape of Y Test',y_test.shape)
Shape of datasets
Shape of X Train (10, 224, 224, 3)
Shape of Y Train (10, 7)
Shape of X Test (5, 224, 224, 3)
Shape of Y Test (5, 7)
print(colored('\x1B[1mPreprocessing Image and applying augmentation','blue'))
image_generator_train = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=20,width_shift_range=0.2,height_shift_range=0.2,
shear_range=0.3,zoom_range=0.5,horizontal_flip=True,vertical_flip=True,
brightness_range=[0.2,0.8],featurewise_center=False,samplewise_center=False,
featurewise_std_normalization=False,samplewise_std_normalization=False,
fill_mode = "reflect")
image_generator_train.fit(X_train, augment=True)
augmented_train = image_generator_train.flow(np.array(X_train),np.array(y_train))
image_generator_test = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255)
image_generator_test.fit(X_test, augment=True)
augmented_test = image_generator_test.flow(np.array(X_test),np.array(y_test))
print(colored('\x1B[1mFitting Train set in Preprocessing and Augmentation featuers','green'))
Preprocessing Image and applying augmentation Fitting Train set in Preprocessing and Augmentation featuers
print(colored('\x1B[1mBuilding CNN Model','blue'))
Building CNN Model
# Initialising the CNN model
model = Sequential()
# Add a Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
model.add(Conv2D(32, (3, 3), input_shape = (224, 224, 3), activation = 'relu', padding = 'same'))
# Add a Max Pooling layer of size 2X2
model.add(MaxPool2D(pool_size = (2, 2)))
# Add another Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
model.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
# Adding another pooling layer
model.add(MaxPool2D(pool_size = (2, 2)))
# Add another Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
model.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
# Adding another pooling layer
model.add(MaxPool2D(pool_size = (2, 2)))
# Flattening the layer before fully connected layers
model.add(Flatten())
# Adding a fully connected layer with 512 neurons
model.add(Dense(units = 512, activation = 'relu'))
# Adding dropout with probability 0.5
model.add(Dropout(0.5))
# Adding a fully connected layer with 128 neurons
model.add(Dense(units = 128, activation = 'relu'))
# The final output layer with 5 neuron to predict the categorical classifcation
model.add(Dense(units = 7, activation = 'softmax'))
from tensorflow.keras.optimizers import Adam
print(colored('\x1B[1mCompiling Model','blue'))
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.001, amsgrad=False)
model.compile(optimizer = opt, loss = 'categorical_crossentropy', metrics = ['accuracy'])
Compiling Model
print(colored('\x1B[1mFiting the Model','blue'))
Fiting the Model
history = model.fit(augmented_train,epochs=5,shuffle=True,validation_data=augmented_test)
Epoch 1/5 1/1 [==============================] - 2s 2s/step - loss: 30.2459 - accuracy: 0.1000 - val_loss: 1.9473 - val_accuracy: 0.0000e+00 Epoch 2/5 1/1 [==============================] - 1s 919ms/step - loss: 298.8923 - accuracy: 0.1000 - val_loss: 1.9427 - val_accuracy: 0.2000 Epoch 3/5 1/1 [==============================] - 1s 943ms/step - loss: 124.0225 - accuracy: 0.2000 - val_loss: 1.9427 - val_accuracy: 0.2000 Epoch 4/5 1/1 [==============================] - 1s 914ms/step - loss: 94.3477 - accuracy: 0.2000 - val_loss: 1.9430 - val_accuracy: 0.0000e+00 Epoch 5/5 1/1 [==============================] - 1s 929ms/step - loss: 27.6467 - accuracy: 0.4000 - val_loss: 1.9431 - val_accuracy: 0.0000e+00
pd.DataFrame(history.history)
| loss | accuracy | val_loss | val_accuracy | |
|---|---|---|---|---|
| 0 | 30.245926 | 0.1 | 1.947283 | 0.0 |
| 1 | 298.892334 | 0.1 | 1.942688 | 0.2 |
| 2 | 124.022522 | 0.2 | 1.942725 | 0.2 |
| 3 | 94.347733 | 0.2 | 1.942997 | 0.0 |
| 4 | 27.646656 | 0.4 | 1.943134 | 0.0 |
score = model.evaluate(X_test, y_test, verbose=0)
score_train['CNN'] = model.evaluate(X_train, y_train, verbose=0)
score_test['CNN Test Score']=score
accuracy['CNN Accuracy'] = score
print(colored('\x1B[1mScore','green'),score)
Score [1.8442113399505615, 0.6000000238418579]
print('''\n\033[1m''' + '''Displaying and comparing all the models designed with their train and test accuracies''' + '''\033[0m''')
results = pd.DataFrame({'Model' : score_train.keys(),
'Train Score' : score_train.values(),
'Train Score': score_test.values(),
'Accuracy' : accuracy.values()})
results
Displaying and comparing all the models designed with their train and test accuracies
| Model | Train Score | Accuracy | |
|---|---|---|---|
| 0 | SVM | 0.2 | 0.2 |
| 1 | Random Forest | 0 | 0 |
| 2 | Decision Tree | 0.2 | 0.2 |
| 3 | NN | [9.118935585021973, 0.0] | [9.118935585021973, 0.0] |
| 4 | CNN | [1.8442113399505615, 0.6000000238418579] | [1.8442113399505615, 0.6000000238418579] |
print(colored('\x1B[1m\t Loss analysis graph of Model\n','green'))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()
Loss analysis graph of Model
print(colored('\x1B[1m\t Accuracy analysis graph of Model\n','green'))
plt.plot(np.array(history.history['accuracy']) * 100)
plt.plot(np.array(history.history['val_accuracy']) * 100)
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.legend(['train', 'validation'])
plt.title('Accuracy over epochs')
plt.show()
Accuracy analysis graph of Model
def plot_confusion_matrix(cm, classes, normalize=False,title='Confusion matrix',cmap=plt.cm.Greens):
fig = plt.figure(figsize=(10,10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
print(colored('\x1B[1mPredict the values from the test data','blue'))
y_pred = model.predict(X_test)
y_pred_Classes = np.argmax(y_pred, axis = 1)
trueY = np.argmax(y_test, axis = 1)
Predict the values from the test data
print(colored('\x1B[1mConfusion Matrix of Model','blue'))
confusionMTX = confusion_matrix(trueY, y_pred_Classes)
plot_confusion_matrix(confusionMTX, classes = Y.classes_)
Confusion Matrix of Model
Though the accuracy of all model comes to be very poor.
But it is not possible to build image classifier without labels.
To build image clssifier, labels are needed.
To label the images respectively, image dataset shuold be build, which can be only done through using a pretrained classifer model and its data.